In [1]:
from IPython.display import HTML
def hide_code(): return HTML('''<script>code_show=true; function code_toggle() {if (code_show){$('div.input').hide();} else {$('div.input').show();}code_show = !code_show} $( document ).ready(code_toggle);</script><form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
hide_code()
Out[1]:

Performance table

The training and validation performances of each model are shown below with $\textbf{Mean Squared Error (MSE)}$ and $\textbf{Mean Absolute Percentage Error (MAPE)}$ as the chosen performance criteria. Results obtained with linear regression $\textbf{(LinReg)}$ are also added for comparison. Each value is rounded with a relative error less than 1%.

In [2]:
import pandas as pd
import numpy as np
from utils.df_handler import get_significant_figures

errors=[]
for error_type in ['se','ape']:
    for data_type in ['LOB','LIQ','LOB+LIQ']:
        with open('./eval/models/errors/' + f'{data_type}.npy', 'rb') as input:
            nn_errors = np.load(input,allow_pickle='TRUE').item()
        with open('./eval/models/errors/' + f'LinReg_{data_type}.npy', 'rb') as input:
            linreg_errors = np.load(input,allow_pickle='TRUE').item()
        for model_errors in [nn_errors,linreg_errors]:
            errors.append(np.ravel([[i,k] for i,k in \
              zip(np.mean(model_errors[error_type+'_train'],0),np.mean(model_errors[error_type+'_val'],0))]).tolist())
        
    
indices = pd.MultiIndex.from_tuples([(i,j,k) for i in ['MSE','MAPE (%)'] \
                                                 for j in ['LOB','LIQ','LOB+LIQ'] \
                                                         for k in ['NN','LinReg']])

columns = pd.MultiIndex.from_tuples([(i,k) 
    for i in ['mid price','bid price expectation','ask price expectation','bid price variance','ask price variance'] \
        for k in ['train','val']])

errors_df = pd.DataFrame(errors,index=indices,columns=columns)
get_significant_figures(errors_df,3,0.01)
Out[2]:
mid price bid price expectation ask price expectation bid price variance ask price variance
train val train val train val train val train val
MSE LOB NN 0.472•E-3 0.403•E-2 0.47•E-3 0.405•E-2 0.474•E-3 0.403•E-2 0.973•E-9 0.126•E-8 0.959•E-9 0.148•E-8
LinReg 0.956•E-2 0.589•E-2 0.956•E-2 0.588•E-2 0.956•E-2 0.588•E-2 0.559•E-9 0.102•E-8 0.553•E-9 0.131•E-8
LIQ NN 0.179•E-3 0.406•E-2 0.185•E-3 0.417•E-2 0.181•E-3 0.448•E-2 0.975•E-9 0.127•E-8 0.959•E-9 0.149•E-8
LinReg 0.322•E-2 0.294•E-2 0.323•E-2 0.295•E-2 0.32•E-2 0.295•E-2 0.11•E-8 0.142•E-8 0.942•E-9 0.165•E-8
LOB+LIQ NN 0.122•E-3 0.226•E-2 0.124•E-3 0.224•E-2 0.116•E-3 0.211•E-2 0.975•E-9 0.126•E-8 0.958•E-9 0.149•E-8
LinReg 0.296•E-2 0.364•E-2 0.296•E-2 0.365•E-2 0.296•E-2 0.365•E-2 0.542•E-9 0.103•E-8 0.537•E-9 0.136•E-8
MAPE (%) LOB NN 0.161 0.435 0.161 0.436 0.16 0.433 14.73 17.78 14.77 22.89
LinReg 0.911 0.599 0.913 0.6 0.908 0.597 10.17 14.62 10.23 19.08
LIQ NN 0.103 0.394 0.105 0.4 0.102 0.407 14.74 17.93 14.77 23.12
LinReg 0.445 0.415 0.447 0.416 0.443 0.413 15.12 18.52 14.35 21.89
LOB+LIQ NN 0.864•E-1 0.283 0.867•E-1 0.287 0.838•E-1 0.273 14.75 17.8 14.76 23.17
LinReg 0.422 0.418 0.423 0.419 0.421 0.417 10.05 14.64 10.14 18.59

Performance Plots

Below we have plots illustrating the validation performances of the models.

NN : Feedforward Neural Network

LinReg : Linear Model

In [3]:
from utils.plotter import plotter

model_names = [' '*i+k+' '*i for i in range(3) for k in ['NN','LinReg']]#np.ravel([[i,f'LinReg ({i})'] for i in ['LOB','LIQ','LOB+LIQ']]).tolist()
errors_df_rounded = get_significant_figures(errors_df,3,0.01)
args = [ #FIRST AXIS
        [ [model_names,errors_df[('mid price','val')]['MSE'].values,'X',dict(color='blue',label='MSE',markersize=15,alpha=0.5)]
         ,[errors_df[('mid price','val')]['MSE'].values,[*range(len(model_names))],[-1]*len(model_names),'blue','dashed',dict(linewidth=0.4)]
         ,[-0.5,len(model_names)-0.5],None#[0,0.26]
         ,{'plot':[model_names,errors_df[('mid price','val')]['MAPE (%)'].values,'o'],'plot_specs':dict(marker='o',color='limegreen',label='MAPE',markersize=20,mfc='None'),'hline_specs':dict(linewidth=0.4),'ylabel':dict(ylabel='MAPE (%)',fontsize=15,rotation=-90,labelpad=18),'tick_params':dict(axis='both',labelsize=12)}
         ,[dict(axis='both',labelsize=12)]
         ,['',dict(fontsize=15)], ['MSE',dict(fontsize=15)], ['Mid Price',dict(fontsize=20)]
         ,[dict(cellText=[errors_df_rounded[('mid price','val')]['MSE'].values.tolist(),errors_df_rounded[('mid price','val')]['MAPE (%)'].values.tolist()],rowLabels=['MSE','MAPE (%)'],cellLoc='center',bbox=[0, -0.22, 1, 0.1])]
         ,[dict(cellText=[['Trained with '+i for i in ('LOB', 'LIQ','LOB+LIQ')]],cellLoc='center',colLabels=['']*3,edges='vertical',bbox=[0, -0.12, 1, 0.12]),{'text_props':dict(set_color=['red'])}] #hor,ver,hor_size,ver_size
         ,[dict(ncol=1,shadow=1,labelspacing=0.3,fontsize=15,loc='upper left')],[dict(b=True,axis='x',alpha=0.5)]
        ]
        ,#SECOND AXIS
        [ [model_names,errors_df[('bid price expectation','val')]['MSE'].values,'X',dict(color='blue',label='MSE',markersize=15,alpha=0.5)]
         ,[errors_df[('bid price expectation','val')]['MSE'].values,[*range(len(model_names))],[-1]*len(model_names),'blue','dashed',dict(linewidth=0.4)]
         ,[-0.5,len(model_names)-0.5],None#[0,0.26]
         ,{'plot':[model_names,errors_df[('bid price expectation','val')]['MAPE (%)'].values,'o'],'plot_specs':dict(marker='o',color='limegreen',label='MAPE',markersize=20,mfc='None'),'hline_specs':dict(linewidth=0.4),'ylabel':dict(ylabel='MAPE (%)',fontsize=15,rotation=-90,labelpad=18),'tick_params':dict(axis='both',labelsize=12)}
         ,[dict(axis='both',labelsize=12)]
         ,['',dict(fontsize=15)], ['MSE',dict(fontsize=15)], ['Bid Price Expectation',dict(fontsize=20)]
         ,[dict(cellText=[errors_df_rounded[('bid price expectation','val')]['MSE'].values.tolist(),errors_df_rounded[('bid price expectation','val')]['MAPE (%)'].values.tolist()],rowLabels=['MSE','MAPE (%)'],cellLoc='center',bbox=[0, -0.22, 1, 0.1])]
         ,[dict(cellText=[['Trained with '+i for i in ('LOB', 'LIQ','LOB+LIQ')]],cellLoc='center',colLabels=['']*3,edges='vertical',bbox=[0, -0.12, 1, 0.12]),{'text_props':dict(set_color=['red'])}] #hor,ver,hor_size,ver_size
         ,[dict(ncol=1,shadow=1,labelspacing=0.3,fontsize=15,loc='upper left')],[dict(b=True,axis='x',alpha=0.5)]
        ]
        ,#THIRD AXIS
        [ [model_names,errors_df[('ask price expectation','val')]['MSE'].values,'X',dict(color='blue',label='MSE',markersize=15,alpha=0.5)]
         ,[errors_df[('ask price expectation','val')]['MSE'].values,[*range(len(model_names))],[-1]*len(model_names),'blue','dashed',dict(linewidth=0.4)]
         ,[-0.5,len(model_names)-0.5],None#[0,0.26]
         ,{'plot':[model_names,errors_df[('ask price expectation','val')]['MAPE (%)'].values,'o'],'plot_specs':dict(marker='o',color='limegreen',label='MAPE',markersize=20,mfc='None'),'hline_specs':dict(linewidth=0.4),'ylabel':dict(ylabel='MAPE (%)',fontsize=15,rotation=-90,labelpad=18),'tick_params':dict(axis='both',labelsize=12)}
         ,[dict(axis='both',labelsize=12)]
         ,['',dict(fontsize=15)], ['MSE',dict(fontsize=15)], ['Ask Price Expectation',dict(fontsize=20)]
         ,[dict(cellText=[errors_df_rounded[('ask price expectation','val')]['MSE'].values.tolist(),errors_df_rounded[('ask price expectation','val')]['MAPE (%)'].values.tolist()],rowLabels=['MSE','MAPE (%)'],cellLoc='center',bbox=[0, -0.22, 1, 0.1])]
         ,[dict(cellText=[['Trained with '+i for i in ('LOB', 'LIQ','LOB+LIQ')]],cellLoc='center',colLabels=['']*3,edges='vertical',bbox=[0, -0.12, 1, 0.12]),{'text_props':dict(set_color=['red'])}] #hor,ver,hor_size,ver_size
         ,[dict(ncol=1,shadow=1,labelspacing=0.3,fontsize=15,loc='upper left')],[dict(b=True,axis='x',alpha=0.5)]
        ]
        ,#FOURTH AXIS
        [ [model_names,errors_df[('bid price variance','val')]['MSE'].values,'X',dict(color='blue',label='MSE',markersize=15,alpha=0.5)]
         ,[errors_df[('bid price variance','val')]['MSE'].values,[*range(len(model_names))],[-1]*len(model_names),'blue','dashed',dict(linewidth=0.4)]
         ,[-0.5,len(model_names)-0.5],None#[0,0.26]
         ,{'plot':[model_names,errors_df[('bid price variance','val')]['MAPE (%)'].values,'o'],'plot_specs':dict(marker='o',color='limegreen',label='MAPE',markersize=20,mfc='None'),'hline_specs':dict(linewidth=0.4),'ylabel':dict(ylabel='MAPE (%)',fontsize=15,rotation=-90,labelpad=18),'tick_params':dict(axis='both',labelsize=12)}
         ,[dict(axis='both',labelsize=12)]
         ,['',dict(fontsize=15)], ['MSE',dict(fontsize=15)], ['Bid Price Variance',dict(fontsize=20)]
         ,[dict(cellText=[errors_df_rounded[('bid price variance','val')]['MSE'].values.tolist(),errors_df_rounded[('bid price variance','val')]['MAPE (%)'].values.tolist()],rowLabels=['MSE','MAPE (%)'],cellLoc='center',bbox=[0, -0.22, 1, 0.1])]
         ,[dict(cellText=[['Trained with '+i for i in ('LOB', 'LIQ','LOB+LIQ')]],cellLoc='center',colLabels=['']*3,edges='vertical',bbox=[0, -0.12, 1, 0.12]),{'text_props':dict(set_color=['red'])}] #hor,ver,hor_size,ver_size
         ,[dict(ncol=1,shadow=1,labelspacing=0.3,fontsize=15,loc='lower left')],[dict(b=True,axis='x',alpha=0.5)]
        ]
        ,#FIFTH AXIS
        [ [model_names,errors_df[('ask price variance','val')]['MSE'].values,'X',dict(color='blue',label='MSE',markersize=15,alpha=0.5)]
         ,[errors_df[('ask price variance','val')]['MSE'].values,[*range(len(model_names))],[-1]*len(model_names),'blue','dashed',dict(linewidth=0.4)]
         ,[-0.5,len(model_names)-0.5],None#[0,0.26]
         ,{'plot':[model_names,errors_df[('ask price variance','val')]['MAPE (%)'].values,'o'],'plot_specs':dict(marker='o',color='limegreen',label='MAPE',markersize=20,mfc='None'),'hline_specs':dict(linewidth=0.4),'ylabel':dict(ylabel='MAPE (%)',fontsize=15,rotation=-90,labelpad=18),'tick_params':dict(axis='both',labelsize=12)}
         ,[dict(axis='both',labelsize=12)]
         ,['',dict(fontsize=15)], ['MSE',dict(fontsize=15)], ['Ask Price Variance',dict(fontsize=20)]
         ,[dict(cellText=[errors_df_rounded[('ask price variance','val')]['MSE'].values.tolist(),errors_df_rounded[('ask price variance','val')]['MAPE (%)'].values.tolist()],rowLabels=['MSE','MAPE (%)'],cellLoc='center',bbox=[0, -0.22, 1, 0.1])]
         ,[dict(cellText=[['Trained with '+i for i in ('LOB', 'LIQ','LOB+LIQ')]],cellLoc='center',colLabels=['']*3,edges='vertical',bbox=[0, -0.12, 1, 0.12]),{'text_props':dict(set_color=['red'])}] #hor,ver,hor_size,ver_size
         ,[dict(ncol=1,shadow=1,labelspacing=0.3,fontsize=15,loc='lower left')],[dict(b=True,axis='x',alpha=0.5)]
        ]
       ]
attrs = ['plot'
         ,'hlines'
         ,'set_xlim','set_ylim'
         ,'make_twiny'
         ,'tick_params'
         ,'set_xlabel','set_ylabel','set_title'
         ,'make_table'
         ,'make_table'
         ,'legend','grid']

plotter(args,attrs,fig_title='Validation Performance of Models\n Stock: GARAN\n Year: 2017',dpi=300, ncols=1,xpad=-10,ypad=5,hspace = 0.32,suptitle_y=0.95)

Histograms and Autocorrelation Analysis of Errors

Below we have histograms and autocorrelation of errors for both feedforward and linear regression models.

For each forecast quantity we have three training cases (LOB, LIQ, LOB+LIQ), resulting in fifteen plots in total.

Each plot consists of two coordinate systems, one belonging to the histograms, the other belonging to the autocorrelation graphs. The axes of histogram and autocorrelation graphs are given different colors for better distinguishability.

We are showing two types of erros per plot:

  • Squared Error shown above the x-axes of histogram and autocorrelation graphs

  • Relative Error shown below the x-axes of histogram and autocorrelation graphs

Then for each type of error we look at two cases:

  • Training Errors shown at the left side of the y-axes of histogram and autocorrelation graphs

  • Validation Errors shown at the right side of the y-axes of histogram and autocorrelation graphs

Below is a table summarizing what is shown in each quadrant:

Plot Compass
Squared Training Errors Squared Validation Errors
Relative Training Errors Relative Validation Errors

Histograms

In histogram coordinate system we show the error bins on the x-axis and the count of errors that falls into the corresponding error value range on the y-axis.

The error values are divided by their mean value and the error counts are normalized by the total count. The mean values used as reference correspond to the MSE and MAPE values shown in the previous section.

Applying the described procedure,

Squared Error becomes: $\frac{(y_i - \hat{y}_i)^{2}} {\frac{1}{n} \sum^{n}_{k=1} (y_k - \hat{y}_k)^{2}} $

Relative Error becomes: $\frac {|{1 - \frac{\hat{y}_i}{y_i}}|} {\frac{1}{n} \sum^{n}_{k=1} |{1 - \frac{\hat{y}_k}{y_k}}|} $

where $y$: Ground Truth and $\hat{y}$: Prediction.

Autocorrelation

Autocorrelation coefficients of errors for time lags ranging from 2000 minutes to 18000 minutes can also be seen in the plots. The coefficients are calculated and shown every 2000 minutes and every 4000 minutes an indicator symbol is used for better tracking.

The Pearson correlation coefficient formula is used to quantify the autocorrelation of the errors $(X_k)_{1\leq k\leq n}$ for a given lag $l$ with total minutes n:

$\;\;\;\;\;\;\;\;\;\;\; \rho_l = \frac{1}{n-l}\sum^{n-l}_{i=1} \tilde{X}^{l}_{i} \cdot \tilde{Y}^{l}_{i}$

, where $\tilde{X}^{l} = (\tilde{X}_1,...,\tilde{X}_{n-l})$ and $\tilde{Y}^{l}$ is the delayed version of $\tilde{X} = (\tilde{X}_1,...,\tilde{X}_n)$ by $l$ minutes, $\tilde{Y}^{l}_{k} := \tilde{X}_{l+k}$
and $\tilde{Z} = (\tilde{Z}_k)_{1\leq k\leq N}$ is the standardized version of a random vector $Z = (Z_k)_{1\leq k\leq N}$, $\tilde{Z}_{k} := \frac{Z_k-\mu_Z}{\sqrt{\frac{1}{N}\sum^{N}_{i=1}Z^{2}_{i}-\mu^{2}_Z}}$ with $\mu_Z := \frac{1}{N}\sum^{N}_{i=1}Z_i$.

The time lags are given on the x-axis in minutes and on the y-axis, the absolute value of the correlation coeeficient $|\rho|$ is given.

The sign of the correlation coefficient can be read off the tuples $(\text{symbol of error type , sign})$ shown next to the y-axis.
In [4]:
from utils.plotter import plotter
import numpy as np

def acf(x, maxlags,step=1):
    return np.array([1]+[np.corrcoef(x[:-i], x[i:])[0,1]  for i in range(step, maxlags,step)])

def get_xlim(errors_h,errors,tol):
    for i,k in enumerate(np.where(errors_h>tol*errors_h.max(),errors_h,0)[::-1]):
        if k != 0:
            return errors[len(errors)-i-1]
        
def get_axis_args(data_type,observable,bins,xlim_tol,maxlags,**kwargs):
    
    obs_dict = {'Mid Price':0,'Bid Price Expectation':1,'Ask Price Expectation':2,'Bid Price Variance':3,'Ask Price Variance':4}
    n = obs_dict[observable]
    data_type_dict = {'LOB':'a) ','LIQ':'b) ','LOB+LIQ':'c) '}
    
    acorr_step = kwargs.get('acorr_step',1)
    acorr_start = 1 #acorr_step in kac kati
    pf = (maxlags-acorr_start*acorr_step)//acorr_step//4
    ps = 6
    lw=1
    maxlags += 1
    
    with open('./eval/models/errors/' + f'{data_type}.npy', 'rb') as input:
        nn_errors = np.load(input,allow_pickle='TRUE').item()
        
    with open('./eval/models/errors/' + f'LinReg_{data_type}.npy', 'rb') as input:
        linreg_errors = np.load(input,allow_pickle='TRUE').item()

    se_train = nn_errors['se_train'][:,n]           ; se_train /= se_train.mean()
    se_train_reg = linreg_errors['se_train'][:,n]   ; se_train_reg /= se_train_reg.mean()
    se_val = nn_errors['se_val'][:,n]               ; se_val /= se_val.mean()
    se_val_reg = linreg_errors['se_val'][:,n]       ; se_val_reg /= se_val_reg.mean()
    ape_train = nn_errors['ape_train'][:,n]         ; ape_train /= ape_train.mean()
    ape_train_reg = linreg_errors['ape_train'][:,n] ; ape_train_reg /= ape_train_reg.mean()
    ape_val = nn_errors['ape_val'][:,n]             ; ape_val /= ape_val.mean()
    ape_val_reg = linreg_errors['ape_val'][:,n]     ; ape_val_reg /= ape_val_reg.mean()

    train_h , train= np.histogram(se_train,bins)              ; train = train[:-1] * 100         ; train_h = train_h/train_h.sum() * 100
    train_h_reg , train_reg = np.histogram(se_train_reg,bins) ; train_reg = train_reg[:-1] * 100 ; train_h_reg = train_h_reg/train_h_reg.sum() * 100
    val_h , val= np.histogram(se_val,bins)                    ; val = val[:-1] * 100             ; val_h = val_h/val_h.sum() * 100
    val_h_reg , val_reg = np.histogram(se_val_reg,bins)       ; val_reg = val_reg[:-1] * 100     ; val_h_reg = val_h_reg/val_h_reg.sum() * 100

    train_h_ape , train_ape= np.histogram(ape_train,bins)              ; train_ape = train_ape[:-1] * 100         ; train_h_ape = train_h_ape/train_h_ape.sum() * 100
    train_h_reg_ape , train_reg_ape = np.histogram(ape_train_reg,bins) ; train_reg_ape = train_reg_ape[:-1] * 100 ; train_h_reg_ape = train_h_reg_ape/train_h_reg_ape.sum() * 100
    val_h_ape , val_ape = np.histogram(ape_val,bins)                   ; val_ape = val_ape[:-1] * 100             ; val_h_ape = val_h_ape/val_h_ape.sum() * 100
    val_h_reg_ape , val_reg_ape = np.histogram(ape_val_reg,bins)       ; val_reg_ape = val_reg_ape[:-1] * 100     ; val_h_reg_ape = val_h_reg_ape/val_h_reg_ape.sum() * 100

    xlim= max([get_xlim(i,k,xlim_tol) for i,k in zip([train_h,train_h_reg,val_h,val_h_reg,train_h_ape,train_h_reg_ape,val_h_ape,val_h_reg_ape],[train,train_reg,val,val_reg,train_ape,train_reg_ape,val_ape,val_reg_ape])])
    
    ac_se_train = acf(se_train,maxlags,acorr_step)[acorr_start:]   ; ac_se_train_reg = acf(se_train_reg,maxlags,acorr_step)[acorr_start:]
    ac_ape_train = acf(ape_train,maxlags,acorr_step)[acorr_start:]  ; ac_ape_train_reg = acf(ape_train_reg,maxlags,acorr_step)[acorr_start:] 
    ac_se_val = acf(se_val,maxlags,acorr_step)[acorr_start:]        ; ac_se_val_reg = acf(se_val_reg,maxlags,acorr_step)[acorr_start:]
    ac_ape_val = acf(ape_val,maxlags,acorr_step)[acorr_start:]      ; ac_ape_val_reg = acf(ape_val_reg,maxlags,acorr_step) [acorr_start:]
    
    acorr_range = [*range(acorr_start*acorr_step, maxlags,acorr_step)] 
    acorr_range_neg = [*range(-acorr_start*acorr_step, -maxlags,-acorr_step)] 
    
    ylim_2nd = max(ac_se_train.max(),ac_ape_train.max(),ac_se_val.max(),ac_ape_val.max(),ac_se_train_reg.max(),ac_ape_train_reg.max(),ac_se_val_reg.max(),ac_ape_val_reg.max())
    ylim_2nd = round(ylim_2nd,2)
    ylim_2nd += 0.2
    axis_args = [
            [ 
              [-train,train_h,-np.diff(train)[0],dict(align='edge',color='blue',alpha=0.8)]              ,[val,val_h,np.diff(val)[0],dict(align='edge',color='tomato',alpha=0.8)]              ,[-train_reg,train_h_reg,-np.diff(train_reg)[0],dict(align='edge',color='gold',alpha=0.8)]               ,[val_reg,val_h_reg,np.diff(val_reg)[0],dict(align='edge',color='limegreen',alpha=0.8)]
             ,[-train_ape,-train_h_ape,-np.diff(train_ape)[0],dict(align='edge',color='blue',alpha=0.4)] ,[val_ape,-val_h_ape,np.diff(val_ape)[0],dict(align='edge',color='tomato',alpha=0.4)] ,[-train_reg_ape,-train_h_reg_ape,-np.diff(train_reg_ape)[0],dict(align='edge',color='gold',alpha=0.4)]  ,[val_reg_ape,-val_h_reg_ape,np.diff(val_reg_ape)[0],dict(align='edge',color='limegreen',alpha=0.4)]
             ,[[],'-',dict(marker='o',color='blue',linewidth=2,alpha=0.5,ls='none',fillstyle='left')]                         ,[[],'-',dict(marker='o',color='tomato',linewidth=2,alpha=0.5,ls='none',fillstyle='right')]                ,[[],'-',dict(marker='o',color='gold',linewidth=2,alpha=0.5,ls='none',fillstyle='left')]                                      ,[[],'-',dict(marker='o',color='limegreen',linewidth=2,alpha=0.5,ls='none',fillstyle='right')]
             \
             ,[acorr_range_neg[::pf],ac_se_train[::pf]  ,'o',dict(color='blue',markersize=ps)]               ,[acorr_range[::pf],ac_se_val[::pf]  ,'o',dict(color='crimson',markersize=ps)]                ,[acorr_range_neg[::pf],ac_se_train_reg[::pf]  ,'o',dict(color='gold',markersize=ps)]               ,[acorr_range[::pf],ac_se_val_reg[::pf]  ,'o',dict(color='limegreen',markersize=ps)]
             ,[acorr_range_neg,ac_se_train              ,'-',dict(color='blue',alpha=0.5,linewidth=lw)]    ,[acorr_range,ac_se_val              ,'-',dict(color='crimson',alpha=0.5,linewidth=lw)]     ,[acorr_range_neg,ac_se_train_reg              ,'-',dict(color='gold',alpha=1,linewidth=lw)]            ,[acorr_range,ac_se_val_reg             ,'-',dict(color='limegreen',alpha=1,linewidth=lw)]
             ,[acorr_range_neg[::pf],-ac_ape_train[::pf],'D',dict(color='blue',markersize=ps)]               ,[acorr_range[::pf],-ac_ape_val[::pf],'D',dict(color='crimson',markersize=ps)]                ,[acorr_range_neg[::pf],-ac_ape_train_reg[::pf],'D',dict(color='gold',markersize=ps)]               ,[acorr_range[::pf],-ac_ape_val_reg[::pf],'D',dict(color='limegreen',markersize=ps)]
             ,[acorr_range_neg,-ac_ape_train            ,'--',dict(color='blue',alpha=0.5,linewidth=lw)]   ,[acorr_range,-ac_ape_val            ,'--',dict(color='crimson',alpha=0.5,linewidth=lw)]    ,[acorr_range_neg,-ac_ape_train_reg            ,'--',dict(color='gold',alpha=1,linewidth=lw)]           ,[acorr_range,-ac_ape_val_reg          ,'--',dict(color='limegreen',alpha=1,linewidth=lw)]
             ,[[],dict(marker='o',color='black',ls='solid',fillstyle='none')],[[],dict(marker='D',color='black',ls='dashed',fillstyle='none')]
             \
             ,[-xlim,xlim],[-ylim_2nd,ylim_2nd]
             ,[dict(color='chocolate', lw=1)],[dict(color='chocolate', lw=1)],[dict(color='magenta', lw=0.8)]
             ,[acorr_range_neg[::pf]+acorr_range[::pf]]
             ,dict(x='positive',y='positive'),dict(x='positive',y='positive')
             ,['Errors Relative to the Mean (%)',dict(fontsize=15,color='sienna')],['Normalized Error Counts (%)',dict(fontsize=15,color='sienna')],[data_type_dict[data_type]+data_type,dict(fontsize=20,pad=60)]
             ,['Time Lag in Minutes',dict(fontsize=15,color='darkmagenta',labelpad=5)],[r'Autocorr. Coeff. Magnitude',dict(fontsize=15,rotation=-90,color='darkmagenta',labelpad=15)]
             ,[[-0.06, 1, 'Squared Errors'],dict(fontsize=20,rotation=90,horizontalalignment='right',verticalalignment='top')],[[-0.06, 0, 'Relative Errors'],dict(fontsize=20,rotation=90,horizontalalignment='right',verticalalignment='bottom')],[[0.01, -0.05, 'Training Side'],dict(color='black',fontsize=20,horizontalalignment='left',verticalalignment='top')],[[0.99, -0.05, 'Validation Side'],dict(color='black',fontsize=20,horizontalalignment='right',verticalalignment='top')]
             ,[[1.08, 0.72, r'$(\circ,\plus) ; (\diamond,\minus)$'],dict(color='darkmagenta',fontsize=20,horizontalalignment='right',verticalalignment='bottom',rotation=-90)],[[1.08, 0.28, r'$(\circ,\minus) ; (\diamond,\plus)$'],dict(color='darkmagenta',fontsize=20,horizontalalignment='right',verticalalignment='top',rotation=-90)]
             ,[[0.5, 1.2, kwargs.get('ax_suptitle','')],dict(color='black',fontsize=25,horizontalalignment='center',verticalalignment='bottom')]
             ,dict(color='sienna'),dict(color='darkmagenta'),[dict(axis='x',colors='darkmagenta',rotation=90,direction='in')],[dict(axis='y',colors='darkmagenta',direction='in')],[dict(axis='x',colors='sienna',direction='in')],[dict(axis='y',colors='sienna',direction='in')]
             ,[dict(line_order = [[0, 1], [2, 3],[4],[5]],labels=['NN','LinReg','Squared Err. Autocorr.','Rel. Err. Autocorr.'] ,ncol=2,shadow=1,columnspacing=0.5,labelspacing=1,fontsize=12,loc='upper right')],[dict(b=True,axis='both',alpha=0.5)]
            ]
           ]
    
    return axis_args


args = []

for obs in ['Mid Price','Bid Price Expectation','Ask Price Expectation','Bid Price Variance','Ask Price Variance']:#
    for i,data_type in enumerate(['LOB','LIQ','LOB+LIQ']):#
        args += get_axis_args(data_type,obs,'fd',0.1,maxlags=18000,acorr_step=2000,ax_suptitle=obs*int(i%3==1))


attrs = [
          'bar','bar','bar','bar'
         ,'bar','bar','bar','bar'
         ,'plot','plot','plot','plot'#just for legend
         \
         ,'2nd_plot','2nd_plot','2nd_plot','2nd_plot' #yuvarlak
         ,'2nd_plot','2nd_plot','2nd_plot','2nd_plot' #yuvarlagin cizgisi
         ,'2nd_plot','2nd_plot','2nd_plot','2nd_plot' #diamond
         ,'2nd_plot','2nd_plot','2nd_plot','2nd_plot' #diamond cizgisi
         ,'plot','plot'#just for legend
         \
         ,'set_xlim','2nd_set_ylim'
         ,'axvline','axhline','2nd_axhline'
         ,'2nd_set_xticks'
         ,'ticks','2nd_ticks'
         ,'set_xlabel','set_ylabel','set_title'
         ,'2nd_set_xlabel','2nd_set_ylabel'
         ,'text','text','text','text'
         ,'text','text'
         ,'text' # baslik icin
         ,'color_ax','2nd_color_ax','2nd_tick_params','2nd_tick_params','tick_params','tick_params'
         ,'legend','grid'
        ]

plotter(args,attrs,second_plot=1,fig_title='Distributions and Autocorrelation of Errors\n Stock: GARAN\n Year: 2017',dpi=300, ncols=3,xpad=10,ypad=25, hspace = 0.45 ,suptitle_y=0.95,suptitle_x=0.51)